In [1]:
import glob
import math
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random
import sklearn.metrics as metrics

from tensorflow.keras import optimizers
from tensorflow.keras import backend
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import add, concatenate, Conv2D, Dense, Dropout, Flatten, Input, Lambda
from tensorflow.keras.layers import Activation, AveragePooling2D, BatchNormalization, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical


%matplotlib inline
In [2]:
                            # Set up 'ggplot' style
plt.style.use('ggplot')     # if want to use the default style, set 'classic'
plt.rcParams['ytick.right']     = True
plt.rcParams['ytick.labelright']= True
plt.rcParams['ytick.left']      = False
plt.rcParams['ytick.labelleft'] = False
plt.rcParams['font.family']     = 'Arial'
In [3]:
# where am i?
%pwd
Out[3]:
'C:\\Users\\david\\Documents\\ImageNet'
In [4]:
%ls
 Volume in drive C is Acer
 Volume Serial Number is F2E5-64E8

 Directory of C:\Users\david\Documents\ImageNet

09/16/2019  12:34 AM    <DIR>          .
09/16/2019  12:34 AM    <DIR>          ..
09/09/2019  01:02 AM                43 .gitattributes
08/22/2019  11:06 PM                26 .gitignore
09/15/2019  02:47 PM    <DIR>          .ipynb_checkpoints
09/14/2019  04:34 PM         1,216,519 Create_Train_Test_Set.ipynb
09/14/2019  03:53 PM    <DIR>          data
08/22/2019  11:09 PM           455,126 Download-ImageNet.html
09/09/2019  12:35 AM           288,923 Download-ImageNet.ipynb
09/03/2019  09:40 PM           367,769 Download-Pexels.html
09/09/2019  12:35 AM            94,549 Download-Pexels.ipynb
09/09/2019  01:02 AM        10,518,772 fgs-imgs.npz
09/08/2019  11:18 PM        41,976,052 fgs-imgs128.npz
09/08/2019  11:18 PM        23,611,636 fgs-imgs96.npz
09/14/2019  03:57 PM        49,130,740 fgsOpnImg-imgs96.npz
09/16/2019  12:30 AM               501 FlowerPower.csv
09/16/2019  12:09 AM       523,266,328 FlowerPower.hdf5
09/14/2019  03:06 PM       226,409,716 flr102-imgs96.npz
09/09/2019  01:02 AM        15,728,884 flr-imgs.npz
09/08/2019  11:18 PM        62,374,132 flr-imgs128.npz
09/08/2019  11:18 PM        35,085,556 flr-imgs96.npz
09/09/2019  01:02 AM        13,295,860 flrnonflr-test-imgs.npz
09/08/2019  11:18 PM        52,445,428 flrnonflr-test-imgs128.npz
09/08/2019  11:18 PM        29,500,660 flrnonflr-test-imgs96-0.8.npz
09/14/2019  04:13 PM       102,187,252 flrnonflr-test-imgs96-0.8+.npz
09/08/2019  11:18 PM        14,764,276 flrnonflr-test-imgs96-0.9.npz
09/09/2019  01:02 AM             8,900 flrnonflr-test-labels.npz
09/08/2019  11:18 PM             8,780 flrnonflr-test-labels128.npz
09/08/2019  11:18 PM             8,780 flrnonflr-test-labels96-0.8.npz
09/14/2019  07:39 PM            29,812 flrnonflr-test-labels96-0.8+.npz
09/08/2019  11:18 PM             4,516 flrnonflr-test-labels96-0.9.npz
09/09/2019  01:02 AM        53,133,556 flrnonflr-train-imgs.npz
09/08/2019  11:18 PM       209,584,372 flrnonflr-train-imgs128.npz
09/08/2019  11:18 PM       117,891,316 flrnonflr-train-imgs96-0.8.npz
09/14/2019  04:13 PM       408,748,276 flrnonflr-train-imgs96-0.8+.npz
09/08/2019  11:18 PM       132,627,700 flrnonflr-train-imgs96-0.9.npz
09/09/2019  01:02 AM            34,836 flrnonflr-train-labels.npz
09/08/2019  11:18 PM            34,356 flrnonflr-train-labels128.npz
09/08/2019  11:18 PM            34,356 flrnonflr-train-labels96-0.8.npz
09/14/2019  04:13 PM           118,516 flrnonflr-train-labels96-0.8+.npz
09/08/2019  11:18 PM            38,620 flrnonflr-train-labels96-0.9.npz
08/17/2019  11:53 AM           124,162 ImageNet-Flowers.txt
08/17/2019  03:54 PM            75,692 ImageNet-Fungus.txt
08/17/2019  03:57 PM            81,424 ImageNet-Rocks.txt
09/15/2019  09:58 PM            66,035 Inception-ResNet-v1 & v2.ipynb
09/15/2019  03:16 PM            58,343 Inception-v4.ipynb
09/14/2019  11:39 PM            26,103 model.pdf
09/14/2019  07:39 PM    <DIR>          npz
09/03/2019  09:40 PM           128,688 Pexels-Flowers.txt
09/03/2019  09:40 PM            28,575 Pexels-Umbrellas.txt
09/09/2019  01:02 AM        22,733,044 pxl_flr-imgs.npz
09/08/2019  11:18 PM        88,080,628 pxl_flr-imgs128.npz
09/08/2019  11:18 PM        49,545,460 pxl_flr-imgs96.npz
09/09/2019  01:02 AM         5,173,492 pxl_umb-imgs.npz
09/08/2019  11:18 PM        20,594,932 pxl_umb-imgs128.npz
09/08/2019  11:18 PM        11,584,756 pxl_umb-imgs96.npz
09/09/2019  01:02 AM        12,275,956 rck-imgs.npz
09/08/2019  11:18 PM        49,004,788 rck-imgs128.npz
09/08/2019  11:18 PM        27,565,300 rck-imgs96.npz
09/14/2019  04:01 PM    <DIR>          readings
08/22/2019  11:02 PM                44 README.md
09/14/2019  04:21 PM           417,457 Reshape_Resize_Images.ipynb
09/09/2019  12:48 AM         8,546,104 train_Neural_Network (Conv2D, 96-0.8).html
09/15/2019  10:09 PM         2,427,075 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data, try13).html
09/15/2019  02:35 AM        12,032,935 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try10).html
09/15/2019  11:36 AM         2,387,331 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try11).html
09/15/2019  05:42 PM         2,291,568 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try12).html
09/14/2019  08:36 PM         7,071,416 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp + Added data, try9).html
09/11/2019  01:01 AM         4,494,650 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try6).html
09/11/2019  10:59 PM         6,116,768 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try7).html
09/12/2019  02:35 AM         5,851,809 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try8).html
09/09/2019  03:08 AM         3,900,219 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try3).html
09/09/2019  11:09 PM         6,528,529 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try4).html
09/10/2019  08:44 PM         6,636,754 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try5).html
09/09/2019  01:32 AM         6,583,279 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try1).html
09/09/2019  02:40 AM         6,300,696 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try2).html
09/09/2019  01:23 AM         6,446,135 train_Neural_Network (ResNetV1, 96-0.8, no Dropout, try1).html
09/16/2019  12:34 AM         1,962,135 train_Neural_Network.ipynb
09/14/2019  04:08 PM        88,003,828 umbOpnImg-imgs96.npz
09/14/2019  07:39 PM         2,094,090 VGG_Model_Setup.ipynb
09/14/2019  07:39 PM            17,772 VGG_Model_Train_Test.ipynb
              75 File(s)  2,592,283,412 bytes
               6 Dir(s)  77,718,167,552 bytes free
In [5]:
flowers = glob.glob('./data/flr_*.jpg')
fungus = glob.glob('./data/fgs_*.jpg')
rocks = glob.glob('./data/rck_*.jpg')

pixel_flowers = glob.glob('./data/pxl_flower_*.jpeg')
pixel_umbrella = glob.glob('./data/pxl_umbrella_*.jpeg')
print("There are %s, %s flower, %s fungus, %s rock and %s umbrella pictures" %(len(flowers), len(pixel_flowers), len(fungus), len(rocks), len(pixel_umbrella)))
There are 1269, 1792 flower, 856 fungus, 1007 rock and 420 umbrella pictures
In [6]:
# Randomly show 10 examples of the images
from IPython.display import Image
    
dataset = flowers #flowers #fungus #rocks

for i in range(0, 5):
    index = random.randint(0, len(dataset)-1)   
    print("Showing:", dataset[index])
    
    img = mpimg.imread(dataset[index])
    imgplot = plt.imshow(img)
    plt.show()

#Image(dataset[index])
Showing: ./data\flr_00235.jpg
Showing: ./data\flr_01035.jpg
Showing: ./data\flr_01267.jpg
Showing: ./data\flr_01880.jpg
Showing: ./data\flr_00595.jpg

Extract the training and testing datasets

In [7]:
# Load the data
trDatOrg       = np.load('flrnonflr-train-imgs96-0.8+.npz')['arr_0']
trLblOrg       = np.load('flrnonflr-train-labels96-0.8+.npz')['arr_0']
tsDatOrg       = np.load('flrnonflr-test-imgs96-0.8+.npz')['arr_0']
tsLblOrg       = np.load('flrnonflr-test-labels96-0.8+.npz')['arr_0']
In [8]:
print("For the training and test datasets:")
print("The shapes are %s, %s, %s, %s" \
      %(trDatOrg.shape, trLblOrg.shape, tsDatOrg.shape, tsLblOrg.shape))
For the training and test datasets:
The shapes are (14784, 96, 96, 3), (14784,), (3696, 96, 96, 3), (3696,)
In [9]:
# Randomly show 10 examples of the images

data = tsDatOrg
label = tsLblOrg

for i in range(20):
    index = random.randint(0, len(data)-1)
    print("Showing %s index image, It is %s" %(index, label[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 2174 index image, It is 1.0
Showing 660 index image, It is 1.0
Showing 466 index image, It is 1.0
Showing 1901 index image, It is 1.0
Showing 3112 index image, It is 0.0
Showing 2228 index image, It is 1.0
Showing 1928 index image, It is 1.0
Showing 3228 index image, It is 0.0
Showing 1603 index image, It is 1.0
Showing 3676 index image, It is 0.0
Showing 2311 index image, It is 0.0
Showing 2076 index image, It is 1.0
Showing 1552 index image, It is 1.0
Showing 662 index image, It is 1.0
Showing 1372 index image, It is 1.0
Showing 1517 index image, It is 1.0
Showing 465 index image, It is 1.0
Showing 172 index image, It is 1.0
Showing 2226 index image, It is 1.0
Showing 752 index image, It is 1.0
In [10]:
# Convert the data into 'float32'
# Rescale the values from 0~255 to 0~1
trDat       = trDatOrg.astype('float32')/255
tsDat       = tsDatOrg.astype('float32')/255

# Retrieve the row size of each image
# Retrieve the column size of each image
imgrows     = trDat.shape[1]
imgclms     = trDat.shape[2]
channel     = 3

# # reshape the data to be [samples][width][height][channel]
# # This is required by Keras framework
# trDat       = trDat.reshape(trDat.shape[0], imgrows, imgclms, channel)
# tsDat       = tsDat.reshape(tsDat.shape[0], imgrows, imgclms, channel)

# Perform one hot encoding on the labels
# Retrieve the number of classes in this problem
trLbl       = to_categorical(trLblOrg)
tsLbl       = to_categorical(tsLblOrg)
num_classes = tsLbl.shape[1]
In [11]:
# fix random seed for reproducibility
seed = 29
np.random.seed(seed)


modelname = 'FlowerPower'

#optmz = optimizers.Adam(lr=0.001)
optmz = optimizers.RMSprop(lr=0.001)
In [12]:
# Baseline Model -> func: createBaselineModel()

def createBaselineModel():
    inputs = Input(shape=(imgrows, imgclms, channel))
    x = Conv2D(30, (4, 4), activation='relu')(inputs)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(50, (4, 4), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=[inputs],outputs=x)
    
    model.compile(loss='categorical_crossentropy', 
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
In [13]:
# ResNetV1 -> func: createResNetV1()
def resLyr(inputs,
           numFilters=16,
           kernelSz=3,
           strides=1,
           activation='relu',
           batchNorm=True,
           convFirst=True,
           lyrName=None):
    convLyr = Conv2D(numFilters, kernel_size=kernelSz, strides=strides, 
                     padding='same', kernel_initializer='he_normal', 
                     kernel_regularizer=l2(1e-4), 
                     name=lyrName+'_conv' if lyrName else None)
    x = inputs
    if convFirst:
        x = convLyr(x)
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation,name=lyrName+'_'+activation if lyrName else None)(x)
    else:
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation, name=lyrName+'_'+activation if lyrName else None)(x)
        x = convLyr(x)
    return x


def resBlkV1(inputs,
             numFilters=16,
             numBlocks=3,
             downsampleOnFirst=True,
             names=None):
    x = inputs
    for run in range(0,numBlocks):
        strides = 1
        blkStr = str(run+1)
        if downsampleOnFirst and run == 0:
            strides = 2
        y = resLyr(inputs=x, numFilters=numFilters, strides=strides,
                   lyrName=names+'_Blk'+blkStr+'_Res1' if names else None)
        y = resLyr(inputs=y, numFilters=numFilters, activation=None,
                   lyrName=names+'_Blk'+blkStr+'_Res2' if names else None)
        if downsampleOnFirst and run == 0:
            x = resLyr(inputs=x, numFilters=numFilters, kernelSz=1,
                       strides=strides, activation=None, batchNorm=False,
                       lyrName=names+'_Blk'+blkStr+'_lin' if names else None)
        x = add([x,y], name=names+'_Blk'+blkStr+'_add' if names else None)
        x = Activation('relu', name=names+'_Blk'+blkStr+'_relu' if names else None)(x)
    return x

def createResNetV1(inputShape=(imgrows, imgclms, channel),
                   numClasses=2):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Inpt')
    v = resBlkV1(inputs=v, numFilters=16, numBlocks=3,
                 downsampleOnFirst=False, names='Stg1')
    v = Dropout(0.30)(v)
    v = resBlkV1(inputs=v, numFilters=32, numBlocks=3,
                 downsampleOnFirst=True, names='Stg2')
    v = Dropout(0.40)(v)
    v = resBlkV1(inputs=v, numFilters=64, numBlocks=3,
                 downsampleOnFirst=True, names='Stg3')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=True, names='Stg4')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=False, names='Stg5')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=256, numBlocks=3,
                 downsampleOnFirst=True, names='Stg6')
    v = Dropout(0.50)(v)
    v = AveragePooling2D(pool_size=6, name='AvgPool')(v)
    v = Flatten()(v) 
    outputs = Dense(numClasses, activation='softmax', 
                    kernel_initializer='he_normal')(v)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model
In [14]:
# Mostly Original # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = reduction_b_block(x)
    
    x = inception_c_block(x)
    x = inception_c_block(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [15]:
# Modified2 # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [16]:
# Modified #(halfed) # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=48, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=32, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=32, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=112, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=128, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 96, 112, 128, 192
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 192, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=160, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=160, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(256)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model 
In [17]:
# Mostly Original # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_reduction_b_block(x)
    
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [18]:
# Modified # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = BatchNormalization()(x)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [19]:
# Setup the models
model       = create_inception_v4() # This is meant for training
modelGo     = create_inception_v4() # This is used for final testing

model.summary()
WARNING:tensorflow:From D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\initializers.py:104: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with distribution=normal is deprecated and will be removed in a future version.
Instructions for updating:
`normal` is a deprecated alias for `truncated_normal`
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 96, 96, 3)    0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 47, 47, 32)   896         input_1[0][0]                    
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 45, 45, 32)   9248        conv2d[0][0]                     
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 45, 45, 64)   18496       conv2d_1[0][0]                   
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 22, 22, 64)   0           conv2d_2[0][0]                   
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 22, 22, 96)   55392       conv2d_2[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 22, 22, 160)  0           max_pooling2d[0][0]              
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_6[0][0]                   
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_4[0][0]                   
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_8[0][0]                   
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 20, 20, 192)  0           conv2d_5[0][0]                   
                                                                 conv2d_9[0][0]                   
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 9, 9, 192)    331968      concatenate_1[0][0]              
__________________________________________________________________________________________________
zero_padding2d (ZeroPadding2D)  (None, 10, 10, 192)  0           conv2d_10[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 10, 10, 192)  0           concatenate_1[0][0]              
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 10, 10, 384)  0           zero_padding2d[0][0]             
                                                                 max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 10, 10, 384)  1536        concatenate_2[0][0]              
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 10, 10, 384)  0           batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_15[0][0]                  
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 10, 10, 48)   18480       average_pooling2d[0][0]          
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 10, 10, 48)   18480       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_13[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_16[0][0]                  
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 10, 10, 192)  0           conv2d_11[0][0]                  
                                                                 conv2d_12[0][0]                  
                                                                 conv2d_14[0][0]                  
                                                                 conv2d_17[0][0]                  
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 10, 10, 192)  768         concatenate_3[0][0]              
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_22[0][0]                  
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_1[0][0]        
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_20[0][0]                  
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_23[0][0]                  
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 10, 10, 192)  0           conv2d_18[0][0]                  
                                                                 conv2d_19[0][0]                  
                                                                 conv2d_21[0][0]                  
                                                                 conv2d_24[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 10, 10, 192)  768         concatenate_4[0][0]              
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_29[0][0]                  
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_2[0][0]        
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_27[0][0]                  
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_30[0][0]                  
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 10, 10, 192)  0           conv2d_25[0][0]                  
                                                                 conv2d_26[0][0]                  
                                                                 conv2d_28[0][0]                  
                                                                 conv2d_31[0][0]                  
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 10, 10, 192)  768         concatenate_5[0][0]              
__________________________________________________________________________________________________
conv2d_36 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_34 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_37 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_36[0][0]                  
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_3[0][0]        
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_34[0][0]                  
__________________________________________________________________________________________________
conv2d_38 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_37[0][0]                  
__________________________________________________________________________________________________
concatenate_6 (Concatenate)     (None, 10, 10, 192)  0           conv2d_32[0][0]                  
                                                                 conv2d_33[0][0]                  
                                                                 conv2d_35[0][0]                  
                                                                 conv2d_38[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 10, 10, 192)  768         concatenate_6[0][0]              
__________________________________________________________________________________________________
conv2d_40 (Conv2D)              (None, 10, 10, 192)  37056       batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_41 (Conv2D)              (None, 10, 10, 224)  387296      conv2d_40[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 4, 4, 192)    0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_39 (Conv2D)              (None, 4, 4, 384)    663936      batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_42 (Conv2D)              (None, 4, 4, 256)    516352      conv2d_41[0][0]                  
__________________________________________________________________________________________________
concatenate_7 (Concatenate)     (None, 4, 4, 832)    0           max_pooling2d_2[0][0]            
                                                                 conv2d_39[0][0]                  
                                                                 conv2d_42[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 4, 4, 832)    3328        concatenate_7[0][0]              
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 4, 4, 96)     79968       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_49 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_48[0][0]                  
__________________________________________________________________________________________________
conv2d_45 (Conv2D)              (None, 4, 4, 96)     79968       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_50 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_49[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 4, 4, 832)    0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_46 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_45[0][0]                  
__________________________________________________________________________________________________
conv2d_51 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_50[0][0]                  
__________________________________________________________________________________________________
conv2d_43 (Conv2D)              (None, 4, 4, 64)     53312       average_pooling2d_4[0][0]        
__________________________________________________________________________________________________
conv2d_44 (Conv2D)              (None, 4, 4, 192)    159936      batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_47 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_46[0][0]                  
__________________________________________________________________________________________________
conv2d_52 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_51[0][0]                  
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 4, 4, 512)    0           conv2d_43[0][0]                  
                                                                 conv2d_44[0][0]                  
                                                                 conv2d_47[0][0]                  
                                                                 conv2d_52[0][0]                  
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 4, 4, 512)    2048        concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_58 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_59 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_58[0][0]                  
__________________________________________________________________________________________________
conv2d_55 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_60 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_59[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_56 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_55[0][0]                  
__________________________________________________________________________________________________
conv2d_61 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_60[0][0]                  
__________________________________________________________________________________________________
conv2d_53 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_5[0][0]        
__________________________________________________________________________________________________
conv2d_54 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_57 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_56[0][0]                  
__________________________________________________________________________________________________
conv2d_62 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_61[0][0]                  
__________________________________________________________________________________________________
concatenate_9 (Concatenate)     (None, 4, 4, 512)    0           conv2d_53[0][0]                  
                                                                 conv2d_54[0][0]                  
                                                                 conv2d_57[0][0]                  
                                                                 conv2d_62[0][0]                  
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 4, 4, 512)    2048        concatenate_9[0][0]              
__________________________________________________________________________________________________
conv2d_68 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_69 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_68[0][0]                  
__________________________________________________________________________________________________
conv2d_65 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_70 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_69[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_66 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_65[0][0]                  
__________________________________________________________________________________________________
conv2d_71 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_70[0][0]                  
__________________________________________________________________________________________________
conv2d_63 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_6[0][0]        
__________________________________________________________________________________________________
conv2d_64 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_67 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_66[0][0]                  
__________________________________________________________________________________________________
conv2d_72 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_71[0][0]                  
__________________________________________________________________________________________________
concatenate_10 (Concatenate)    (None, 4, 4, 512)    0           conv2d_63[0][0]                  
                                                                 conv2d_64[0][0]                  
                                                                 conv2d_67[0][0]                  
                                                                 conv2d_72[0][0]                  
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 4, 4, 512)    2048        concatenate_10[0][0]             
__________________________________________________________________________________________________
conv2d_78 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_79 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_78[0][0]                  
__________________________________________________________________________________________________
conv2d_75 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_80 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_79[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_76 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_75[0][0]                  
__________________________________________________________________________________________________
conv2d_81 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_80[0][0]                  
__________________________________________________________________________________________________
conv2d_73 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_7[0][0]        
__________________________________________________________________________________________________
conv2d_74 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_77 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_76[0][0]                  
__________________________________________________________________________________________________
conv2d_82 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_81[0][0]                  
__________________________________________________________________________________________________
concatenate_11 (Concatenate)    (None, 4, 4, 512)    0           conv2d_73[0][0]                  
                                                                 conv2d_74[0][0]                  
                                                                 conv2d_77[0][0]                  
                                                                 conv2d_82[0][0]                  
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 4, 4, 512)    2048        concatenate_11[0][0]             
__________________________________________________________________________________________________
conv2d_88 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_89 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_88[0][0]                  
__________________________________________________________________________________________________
conv2d_85 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_90 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_89[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_86 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_85[0][0]                  
__________________________________________________________________________________________________
conv2d_91 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_90[0][0]                  
__________________________________________________________________________________________________
conv2d_83 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_8[0][0]        
__________________________________________________________________________________________________
conv2d_84 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_87 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_86[0][0]                  
__________________________________________________________________________________________________
conv2d_92 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_91[0][0]                  
__________________________________________________________________________________________________
concatenate_12 (Concatenate)    (None, 4, 4, 512)    0           conv2d_83[0][0]                  
                                                                 conv2d_84[0][0]                  
                                                                 conv2d_87[0][0]                  
                                                                 conv2d_92[0][0]                  
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 4, 4, 512)    2048        concatenate_12[0][0]             
__________________________________________________________________________________________________
conv2d_98 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_99 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_98[0][0]                  
__________________________________________________________________________________________________
conv2d_95 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_100 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_99[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_96 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_95[0][0]                  
__________________________________________________________________________________________________
conv2d_101 (Conv2D)             (None, 4, 4, 112)    87920       conv2d_100[0][0]                 
__________________________________________________________________________________________________
conv2d_93 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_9[0][0]        
__________________________________________________________________________________________________
conv2d_94 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_97 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_96[0][0]                  
__________________________________________________________________________________________________
conv2d_102 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_101[0][0]                 
__________________________________________________________________________________________________
concatenate_13 (Concatenate)    (None, 4, 4, 512)    0           conv2d_93[0][0]                  
                                                                 conv2d_94[0][0]                  
                                                                 conv2d_97[0][0]                  
                                                                 conv2d_102[0][0]                 
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 4, 4, 512)    2048        concatenate_13[0][0]             
__________________________________________________________________________________________________
conv2d_108 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_109 (Conv2D)             (None, 4, 4, 96)     64608       conv2d_108[0][0]                 
__________________________________________________________________________________________________
conv2d_105 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_110 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_109[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_10 (AveragePo (None, 4, 4, 512)    0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_106 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_105[0][0]                 
__________________________________________________________________________________________________
conv2d_111 (Conv2D)             (None, 4, 4, 112)    87920       conv2d_110[0][0]                 
__________________________________________________________________________________________________
conv2d_103 (Conv2D)             (None, 4, 4, 64)     32832       average_pooling2d_10[0][0]       
__________________________________________________________________________________________________
conv2d_104 (Conv2D)             (None, 4, 4, 192)    98496       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_107 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_106[0][0]                 
__________________________________________________________________________________________________
conv2d_112 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_111[0][0]                 
__________________________________________________________________________________________________
concatenate_14 (Concatenate)    (None, 4, 4, 512)    0           conv2d_103[0][0]                 
                                                                 conv2d_104[0][0]                 
                                                                 conv2d_107[0][0]                 
                                                                 conv2d_112[0][0]                 
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 4, 4, 512)    2048        concatenate_14[0][0]             
__________________________________________________________________________________________________
conv2d_115 (Conv2D)             (None, 4, 4, 128)    65664       batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_116 (Conv2D)             (None, 4, 4, 128)    114816      conv2d_115[0][0]                 
__________________________________________________________________________________________________
conv2d_113 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_117 (Conv2D)             (None, 4, 4, 160)    143520      conv2d_116[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 1, 1, 512)    0           batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_114 (Conv2D)             (None, 1, 1, 96)     83040       conv2d_113[0][0]                 
__________________________________________________________________________________________________
conv2d_118 (Conv2D)             (None, 1, 1, 160)    230560      conv2d_117[0][0]                 
__________________________________________________________________________________________________
concatenate_15 (Concatenate)    (None, 1, 1, 768)    0           max_pooling2d_3[0][0]            
                                                                 conv2d_114[0][0]                 
                                                                 conv2d_118[0][0]                 
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 1, 1, 768)    3072        concatenate_15[0][0]             
__________________________________________________________________________________________________
conv2d_124 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_125 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_124[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_11 (AveragePo (None, 1, 1, 768)    0           batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_121 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_126 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_125[0][0]                 
__________________________________________________________________________________________________
conv2d_119 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_11[0][0]       
__________________________________________________________________________________________________
conv2d_120 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_122 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_121[0][0]                 
__________________________________________________________________________________________________
conv2d_123 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_121[0][0]                 
__________________________________________________________________________________________________
conv2d_127 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_126[0][0]                 
__________________________________________________________________________________________________
conv2d_128 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_126[0][0]                 
__________________________________________________________________________________________________
concatenate_16 (Concatenate)    (None, 1, 1, 768)    0           conv2d_119[0][0]                 
                                                                 conv2d_120[0][0]                 
                                                                 conv2d_122[0][0]                 
                                                                 conv2d_123[0][0]                 
                                                                 conv2d_127[0][0]                 
                                                                 conv2d_128[0][0]                 
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 1, 1, 768)    3072        concatenate_16[0][0]             
__________________________________________________________________________________________________
conv2d_134 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_135 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_134[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_12 (AveragePo (None, 1, 1, 768)    0           batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_131 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_136 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_135[0][0]                 
__________________________________________________________________________________________________
conv2d_129 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_12[0][0]       
__________________________________________________________________________________________________
conv2d_130 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_132 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_131[0][0]                 
__________________________________________________________________________________________________
conv2d_133 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_131[0][0]                 
__________________________________________________________________________________________________
conv2d_137 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_136[0][0]                 
__________________________________________________________________________________________________
conv2d_138 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_136[0][0]                 
__________________________________________________________________________________________________
concatenate_17 (Concatenate)    (None, 1, 1, 768)    0           conv2d_129[0][0]                 
                                                                 conv2d_130[0][0]                 
                                                                 conv2d_132[0][0]                 
                                                                 conv2d_133[0][0]                 
                                                                 conv2d_137[0][0]                 
                                                                 conv2d_138[0][0]                 
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 1, 1, 768)    3072        concatenate_17[0][0]             
__________________________________________________________________________________________________
conv2d_144 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_145 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_144[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_13 (AveragePo (None, 1, 1, 768)    0           batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_141 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_146 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_145[0][0]                 
__________________________________________________________________________________________________
conv2d_139 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_13[0][0]       
__________________________________________________________________________________________________
conv2d_140 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_142 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_141[0][0]                 
__________________________________________________________________________________________________
conv2d_143 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_141[0][0]                 
__________________________________________________________________________________________________
conv2d_147 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_146[0][0]                 
__________________________________________________________________________________________________
conv2d_148 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_146[0][0]                 
__________________________________________________________________________________________________
concatenate_18 (Concatenate)    (None, 1, 1, 768)    0           conv2d_139[0][0]                 
                                                                 conv2d_140[0][0]                 
                                                                 conv2d_142[0][0]                 
                                                                 conv2d_143[0][0]                 
                                                                 conv2d_147[0][0]                 
                                                                 conv2d_148[0][0]                 
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 1, 1, 768)    3072        concatenate_18[0][0]             
__________________________________________________________________________________________________
average_pooling2d_14 (AveragePo (None, 1, 1, 768)    0           batch_normalization_16[0][0]     
__________________________________________________________________________________________________
flatten (Flatten)               (None, 768)          0           average_pooling2d_14[0][0]       
__________________________________________________________________________________________________
dense (Dense)                   (None, 256)          196864      flatten[0][0]                    
__________________________________________________________________________________________________
dropout (Dropout)               (None, 256)          0           dense[0][0]                      
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 2)            514         dropout[0][0]                    
==================================================================================================
Total params: 12,173,266
Trainable params: 12,155,986
Non-trainable params: 17,280
__________________________________________________________________________________________________
In [20]:
# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
# filepath        = modelname + ".hdf5"
# checkpoint      = ModelCheckpoint(filepath, 
#                                   monitor='val_acc', 
#                                   verbose=0, 
#                                   save_best_only=True, 
#                                   mode='max')

#                             # Log the epoch detail into csv
# csv_logger      = CSVLogger(modelname +'.csv')
# callbacks_list  = [checkpoint, csv_logger]

def lrSchedule(epoch):
    lr  = 1e-3
    
    if epoch > 270: #190
        lr  *= 0.5e-3
        
    elif epoch > 240: #160
        lr  *= 1e-3
        
    elif epoch > 200: #140
        lr  *= 1e-2
        
    elif epoch > 150: #100
        lr  *= 1e-1
        
    print('Learning rate: ', lr)
    
    return lr

LRScheduler     = LearningRateScheduler(lrSchedule)

                            # Create checkpoint for the training
                            # This checkpoint performs model saving when
                            # an epoch gives highest testing accuracy
filepath        = modelname + ".hdf5"
checkpoint      = ModelCheckpoint(filepath, 
                                  monitor='val_acc', 
                                  verbose=0, 
                                  save_best_only=True, 
                                  mode='max')

                            # Log the epoch detail into csv
csv_logger      = CSVLogger(modelname +'.csv')
callbacks_list  = [checkpoint, csv_logger, LRScheduler]
In [21]:
# Fit the model
# This is where the training starts
# model.fit(trDat, 
#           trLbl, 
#           validation_data=(tsDat, tsLbl), 
#           epochs=60, 
#           batch_size=32,
#           callbacks=callbacks_list)

datagen = ImageDataGenerator(width_shift_range=0.25,
                             height_shift_range=0.25,
                             rotation_range=45,
                             zoom_range=0.8,
                             #zca_epsilon=1e-6,
                             #zca_whitening=True,
                             fill_mode='nearest',
                             horizontal_flip=True,
                             vertical_flip=False)

model.fit_generator(datagen.flow(trDat, trLbl, batch_size=16),
                    validation_data=(tsDat, tsLbl),
                    epochs=300, #300 
                    verbose=1,
                    steps_per_epoch=len(trDat)/16,
                    callbacks=callbacks_list)
Learning rate:  0.001
Epoch 1/300
924/924 [==============================] - 154s 167ms/step - loss: 1.0476 - acc: 0.6608 - val_loss: 1.5867 - val_acc: 0.6783
Learning rate:  0.001
Epoch 2/300
924/924 [==============================] - 117s 126ms/step - loss: 0.5254 - acc: 0.7504 - val_loss: 1.0689 - val_acc: 0.7018
Learning rate:  0.001
Epoch 3/300
924/924 [==============================] - 118s 128ms/step - loss: 0.5005 - acc: 0.7725 - val_loss: 0.6034 - val_acc: 0.7776
Learning rate:  0.001
Epoch 4/300
924/924 [==============================] - 118s 128ms/step - loss: 0.4893 - acc: 0.7773 - val_loss: 0.4706 - val_acc: 0.7649
Learning rate:  0.001
Epoch 5/300
924/924 [==============================] - 119s 128ms/step - loss: 0.4818 - acc: 0.7869 - val_loss: 0.4580 - val_acc: 0.7873
Learning rate:  0.001
Epoch 6/300
924/924 [==============================] - 120s 130ms/step - loss: 0.4736 - acc: 0.7913 - val_loss: 0.4802 - val_acc: 0.8260
Learning rate:  0.001
Epoch 7/300
924/924 [==============================] - 119s 128ms/step - loss: 0.4566 - acc: 0.7966 - val_loss: 0.4088 - val_acc: 0.8439
Learning rate:  0.001
Epoch 8/300
924/924 [==============================] - 119s 128ms/step - loss: 0.4522 - acc: 0.8055 - val_loss: 0.3898 - val_acc: 0.8385
Learning rate:  0.001
Epoch 9/300
924/924 [==============================] - 118s 128ms/step - loss: 0.4492 - acc: 0.8038 - val_loss: 0.4795 - val_acc: 0.7784
Learning rate:  0.001
Epoch 10/300
924/924 [==============================] - 119s 129ms/step - loss: 0.4391 - acc: 0.8069 - val_loss: 0.3823 - val_acc: 0.8396
Learning rate:  0.001
Epoch 11/300
924/924 [==============================] - 118s 128ms/step - loss: 0.4415 - acc: 0.8103 - val_loss: 0.4235 - val_acc: 0.8079
Learning rate:  0.001
Epoch 12/300
924/924 [==============================] - 118s 127ms/step - loss: 0.4384 - acc: 0.8081 - val_loss: 0.4234 - val_acc: 0.8057
Learning rate:  0.001
Epoch 13/300
924/924 [==============================] - 117s 126ms/step - loss: 0.4347 - acc: 0.8103 - val_loss: 0.3641 - val_acc: 0.8477
Learning rate:  0.001
Epoch 14/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4349 - acc: 0.8051 - val_loss: 0.3996 - val_acc: 0.8452
Learning rate:  0.001
Epoch 15/300
924/924 [==============================] - 119s 129ms/step - loss: 0.4328 - acc: 0.8080 - val_loss: 0.3754 - val_acc: 0.8369
Learning rate:  0.001
Epoch 16/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4292 - acc: 0.8136 - val_loss: 0.4585 - val_acc: 0.7895
Learning rate:  0.001
Epoch 17/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4234 - acc: 0.8118 - val_loss: 0.4059 - val_acc: 0.8201
Learning rate:  0.001
Epoch 18/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4276 - acc: 0.8110 - val_loss: 0.3695 - val_acc: 0.8417
Learning rate:  0.001
Epoch 19/300
924/924 [==============================] - 117s 126ms/step - loss: 0.4254 - acc: 0.8132 - val_loss: 0.3586 - val_acc: 0.8558
Learning rate:  0.001
Epoch 20/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4246 - acc: 0.8118 - val_loss: 0.3441 - val_acc: 0.8558
Learning rate:  0.001
Epoch 21/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4149 - acc: 0.8187 - val_loss: 0.3714 - val_acc: 0.8498
Learning rate:  0.001
Epoch 22/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4174 - acc: 0.8157 - val_loss: 0.3783 - val_acc: 0.8271
Learning rate:  0.001
Epoch 23/300
924/924 [==============================] - 117s 127ms/step - loss: 0.4185 - acc: 0.8152 - val_loss: 0.3999 - val_acc: 0.8193
Learning rate:  0.001
Epoch 24/300
924/924 [==============================] - 117s 127ms/step - loss: 0.4172 - acc: 0.8168 - val_loss: 0.3786 - val_acc: 0.8423
Learning rate:  0.001
Epoch 25/300
924/924 [==============================] - 117s 126ms/step - loss: 0.4211 - acc: 0.8164 - val_loss: 0.3582 - val_acc: 0.8596
Learning rate:  0.001
Epoch 26/300
924/924 [==============================] - 119s 129ms/step - loss: 0.4163 - acc: 0.8172 - val_loss: 0.3566 - val_acc: 0.8563
Learning rate:  0.001
Epoch 27/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4124 - acc: 0.8143 - val_loss: 0.3597 - val_acc: 0.8561
Learning rate:  0.001
Epoch 28/300
924/924 [==============================] - 117s 126ms/step - loss: 0.4123 - acc: 0.8174 - val_loss: 0.3474 - val_acc: 0.8604
Learning rate:  0.001
Epoch 29/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4119 - acc: 0.8205 - val_loss: 0.3544 - val_acc: 0.8485
Learning rate:  0.001
Epoch 30/300
924/924 [==============================] - 118s 127ms/step - loss: 0.4125 - acc: 0.8195 - val_loss: 0.3415 - val_acc: 0.8617
Learning rate:  0.001
Epoch 31/300
924/924 [==============================] - 119s 128ms/step - loss: 0.4078 - acc: 0.8231 - val_loss: 0.3343 - val_acc: 0.8650
Learning rate:  0.001
Epoch 32/300
924/924 [==============================] - 118s 128ms/step - loss: 0.4082 - acc: 0.8235 - val_loss: 0.4181 - val_acc: 0.7957
Learning rate:  0.001
Epoch 33/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4058 - acc: 0.8218 - val_loss: 0.3491 - val_acc: 0.8552
Learning rate:  0.001
Epoch 34/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4031 - acc: 0.8216 - val_loss: 0.3482 - val_acc: 0.8531
Learning rate:  0.001
Epoch 35/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3967 - acc: 0.8261 - val_loss: 0.3665 - val_acc: 0.8485
Learning rate:  0.001
Epoch 36/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4030 - acc: 0.8237 - val_loss: 0.3596 - val_acc: 0.8493
Learning rate:  0.001
Epoch 37/300
924/924 [==============================] - 117s 126ms/step - loss: 0.4013 - acc: 0.8248 - val_loss: 0.3685 - val_acc: 0.8547
Learning rate:  0.001
Epoch 38/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3990 - acc: 0.8267 - val_loss: 0.3453 - val_acc: 0.8523
Learning rate:  0.001
Epoch 39/300
924/924 [==============================] - 116s 126ms/step - loss: 0.4005 - acc: 0.8231 - val_loss: 0.3527 - val_acc: 0.8436
Learning rate:  0.001
Epoch 40/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3963 - acc: 0.8263 - val_loss: 0.3413 - val_acc: 0.8582
Learning rate:  0.001
Epoch 41/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3962 - acc: 0.8292 - val_loss: 0.3467 - val_acc: 0.8501
Learning rate:  0.001
Epoch 42/300
924/924 [==============================] - 117s 126ms/step - loss: 0.3917 - acc: 0.8293 - val_loss: 0.3248 - val_acc: 0.8674
Learning rate:  0.001
Epoch 43/300
924/924 [==============================] - 117s 126ms/step - loss: 0.3911 - acc: 0.8287 - val_loss: 0.3449 - val_acc: 0.8747
Learning rate:  0.001
Epoch 44/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3884 - acc: 0.8291 - val_loss: 0.3380 - val_acc: 0.8580
Learning rate:  0.001
Epoch 45/300
924/924 [==============================] - 120s 130ms/step - loss: 0.3868 - acc: 0.8349 - val_loss: 0.3247 - val_acc: 0.8642
Learning rate:  0.001
Epoch 46/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3840 - acc: 0.8320 - val_loss: 0.3285 - val_acc: 0.8718
Learning rate:  0.001
Epoch 47/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3757 - acc: 0.8387 - val_loss: 0.3530 - val_acc: 0.8596
Learning rate:  0.001
Epoch 48/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3793 - acc: 0.8331 - val_loss: 0.3041 - val_acc: 0.8761
Learning rate:  0.001
Epoch 49/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3782 - acc: 0.8404 - val_loss: 0.3362 - val_acc: 0.8547
Learning rate:  0.001
Epoch 50/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3741 - acc: 0.8381 - val_loss: 0.3154 - val_acc: 0.8682
Learning rate:  0.001
Epoch 51/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3714 - acc: 0.8377 - val_loss: 0.3276 - val_acc: 0.8718
Learning rate:  0.001
Epoch 52/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3739 - acc: 0.8408 - val_loss: 0.3545 - val_acc: 0.8607
Learning rate:  0.001
Epoch 53/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3681 - acc: 0.8389 - val_loss: 0.3094 - val_acc: 0.8723
Learning rate:  0.001
Epoch 54/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3715 - acc: 0.8397 - val_loss: 0.3004 - val_acc: 0.8815
Learning rate:  0.001
Epoch 55/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3610 - acc: 0.8471 - val_loss: 0.3163 - val_acc: 0.8566
Learning rate:  0.001
Epoch 56/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3697 - acc: 0.8425 - val_loss: 0.3197 - val_acc: 0.8655
Learning rate:  0.001
Epoch 57/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3580 - acc: 0.8473 - val_loss: 0.2881 - val_acc: 0.8818
Learning rate:  0.001
Epoch 58/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3573 - acc: 0.8460 - val_loss: 0.4122 - val_acc: 0.8103
Learning rate:  0.001
Epoch 59/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3652 - acc: 0.8453 - val_loss: 0.3251 - val_acc: 0.8615
Learning rate:  0.001
Epoch 60/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3606 - acc: 0.8459 - val_loss: 0.2878 - val_acc: 0.8804
Learning rate:  0.001
Epoch 61/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3633 - acc: 0.8446 - val_loss: 0.2987 - val_acc: 0.8758
Learning rate:  0.001
Epoch 62/300
924/924 [==============================] - 117s 126ms/step - loss: 0.3571 - acc: 0.8506 - val_loss: 0.3641 - val_acc: 0.8442
Learning rate:  0.001
Epoch 63/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3533 - acc: 0.8483 - val_loss: 0.3158 - val_acc: 0.8696
Learning rate:  0.001
Epoch 64/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3544 - acc: 0.8516 - val_loss: 0.3009 - val_acc: 0.8745
Learning rate:  0.001
Epoch 65/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3514 - acc: 0.8517 - val_loss: 0.3872 - val_acc: 0.8379
Learning rate:  0.001
Epoch 66/300
924/924 [==============================] - 119s 129ms/step - loss: 0.3543 - acc: 0.8490 - val_loss: 0.2763 - val_acc: 0.8866
Learning rate:  0.001
Epoch 67/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3501 - acc: 0.8506 - val_loss: 0.2887 - val_acc: 0.8753
Learning rate:  0.001
Epoch 68/300
924/924 [==============================] - 119s 129ms/step - loss: 0.3535 - acc: 0.8498 - val_loss: 0.2802 - val_acc: 0.8877
Learning rate:  0.001
Epoch 69/300
924/924 [==============================] - 120s 130ms/step - loss: 0.3502 - acc: 0.8511 - val_loss: 0.3117 - val_acc: 0.8874
Learning rate:  0.001
Epoch 70/300
924/924 [==============================] - 120s 130ms/step - loss: 0.3476 - acc: 0.8553 - val_loss: 0.2715 - val_acc: 0.8888
Learning rate:  0.001
Epoch 71/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3471 - acc: 0.8540 - val_loss: 0.2786 - val_acc: 0.8847
Learning rate:  0.001
Epoch 72/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3418 - acc: 0.8588 - val_loss: 0.2658 - val_acc: 0.8937
Learning rate:  0.001
Epoch 73/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3406 - acc: 0.8529 - val_loss: 0.3010 - val_acc: 0.8780
Learning rate:  0.001
Epoch 74/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3408 - acc: 0.8568 - val_loss: 0.2745 - val_acc: 0.8920
Learning rate:  0.001
Epoch 75/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3407 - acc: 0.8561 - val_loss: 0.2842 - val_acc: 0.8834
Learning rate:  0.001
Epoch 76/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3376 - acc: 0.8576 - val_loss: 0.3228 - val_acc: 0.8653
Learning rate:  0.001
Epoch 77/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3397 - acc: 0.8610 - val_loss: 0.2763 - val_acc: 0.8885
Learning rate:  0.001
Epoch 78/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3338 - acc: 0.8613 - val_loss: 0.2812 - val_acc: 0.8845
Learning rate:  0.001
Epoch 79/300
924/924 [==============================] - 116s 125ms/step - loss: 0.3357 - acc: 0.8573 - val_loss: 0.2698 - val_acc: 0.8926
Learning rate:  0.001
Epoch 80/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3305 - acc: 0.8614 - val_loss: 0.2811 - val_acc: 0.8869
Learning rate:  0.001
Epoch 81/300
924/924 [==============================] - 116s 125ms/step - loss: 0.3316 - acc: 0.8605 - val_loss: 0.2884 - val_acc: 0.8839
Learning rate:  0.001
Epoch 82/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3337 - acc: 0.8605 - val_loss: 0.2775 - val_acc: 0.8918
Learning rate:  0.001
Epoch 83/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3342 - acc: 0.8617 - val_loss: 0.2586 - val_acc: 0.8988
Learning rate:  0.001
Epoch 84/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3353 - acc: 0.8625 - val_loss: 0.2545 - val_acc: 0.8988
Learning rate:  0.001
Epoch 85/300
924/924 [==============================] - 120s 129ms/step - loss: 0.3329 - acc: 0.8630 - val_loss: 0.2752 - val_acc: 0.8899
Learning rate:  0.001
Epoch 86/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3246 - acc: 0.8677 - val_loss: 0.2843 - val_acc: 0.8902
Learning rate:  0.001
Epoch 87/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3305 - acc: 0.8663 - val_loss: 0.2631 - val_acc: 0.8953
Learning rate:  0.001
Epoch 88/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3278 - acc: 0.8632 - val_loss: 0.2659 - val_acc: 0.8994
Learning rate:  0.001
Epoch 89/300
924/924 [==============================] - 116s 125ms/step - loss: 0.3223 - acc: 0.8685 - val_loss: 0.2627 - val_acc: 0.8950
Learning rate:  0.001
Epoch 90/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3284 - acc: 0.8644 - val_loss: 0.2775 - val_acc: 0.8918
Learning rate:  0.001
Epoch 91/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3278 - acc: 0.8655 - val_loss: 0.2923 - val_acc: 0.8831
Learning rate:  0.001
Epoch 92/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3235 - acc: 0.8644 - val_loss: 0.3082 - val_acc: 0.8693
Learning rate:  0.001
Epoch 93/300
924/924 [==============================] - 117s 126ms/step - loss: 0.3211 - acc: 0.8647 - val_loss: 0.2562 - val_acc: 0.8999
Learning rate:  0.001
Epoch 94/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3247 - acc: 0.8630 - val_loss: 0.2612 - val_acc: 0.9012
Learning rate:  0.001
Epoch 95/300
924/924 [==============================] - 116s 125ms/step - loss: 0.3219 - acc: 0.8686 - val_loss: 0.2649 - val_acc: 0.8931
Learning rate:  0.001
Epoch 96/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3171 - acc: 0.8654 - val_loss: 0.2894 - val_acc: 0.8831
Learning rate:  0.001
Epoch 97/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3235 - acc: 0.8642 - val_loss: 0.2855 - val_acc: 0.8815
Learning rate:  0.001
Epoch 98/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3265 - acc: 0.8645 - val_loss: 0.2864 - val_acc: 0.8915
Learning rate:  0.001
Epoch 99/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3222 - acc: 0.8700 - val_loss: 0.2615 - val_acc: 0.8977
Learning rate:  0.001
Epoch 100/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3222 - acc: 0.8650 - val_loss: 0.2687 - val_acc: 0.9029
Learning rate:  0.001
Epoch 101/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3191 - acc: 0.8661 - val_loss: 0.2566 - val_acc: 0.8980
Learning rate:  0.001
Epoch 102/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3230 - acc: 0.8658 - val_loss: 0.2726 - val_acc: 0.8966
Learning rate:  0.001
Epoch 103/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3183 - acc: 0.8657 - val_loss: 0.2478 - val_acc: 0.9026
Learning rate:  0.001
Epoch 104/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3191 - acc: 0.8687 - val_loss: 0.2936 - val_acc: 0.8799
Learning rate:  0.001
Epoch 105/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3185 - acc: 0.8690 - val_loss: 0.2517 - val_acc: 0.9037
Learning rate:  0.001
Epoch 106/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3196 - acc: 0.8663 - val_loss: 0.3586 - val_acc: 0.8479
Learning rate:  0.001
Epoch 107/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3227 - acc: 0.8665 - val_loss: 0.2774 - val_acc: 0.8918
Learning rate:  0.001
Epoch 108/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3195 - acc: 0.8690 - val_loss: 0.2500 - val_acc: 0.9048
Learning rate:  0.001
Epoch 109/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3148 - acc: 0.8728 - val_loss: 0.2696 - val_acc: 0.8996
Learning rate:  0.001
Epoch 110/300
924/924 [==============================] - 120s 129ms/step - loss: 0.3173 - acc: 0.8688 - val_loss: 0.2811 - val_acc: 0.8953
Learning rate:  0.001
Epoch 111/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3153 - acc: 0.8701 - val_loss: 0.4451 - val_acc: 0.8082
Learning rate:  0.001
Epoch 112/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3176 - acc: 0.8694 - val_loss: 0.2810 - val_acc: 0.8866
Learning rate:  0.001
Epoch 113/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3150 - acc: 0.8699 - val_loss: 0.2707 - val_acc: 0.8988
Learning rate:  0.001
Epoch 114/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3162 - acc: 0.8695 - val_loss: 0.2542 - val_acc: 0.9037
Learning rate:  0.001
Epoch 115/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3128 - acc: 0.8708 - val_loss: 0.2994 - val_acc: 0.8796
Learning rate:  0.001
Epoch 116/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3123 - acc: 0.8723 - val_loss: 0.2566 - val_acc: 0.8977
Learning rate:  0.001
Epoch 117/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3119 - acc: 0.8694 - val_loss: 0.2625 - val_acc: 0.8958
Learning rate:  0.001
Epoch 118/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3075 - acc: 0.8713 - val_loss: 0.2536 - val_acc: 0.9045
Learning rate:  0.001
Epoch 119/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3147 - acc: 0.8708 - val_loss: 0.2513 - val_acc: 0.9026
Learning rate:  0.001
Epoch 120/300
924/924 [==============================] - 116s 125ms/step - loss: 0.3166 - acc: 0.8745 - val_loss: 0.2768 - val_acc: 0.8966
Learning rate:  0.001
Epoch 121/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3113 - acc: 0.8739 - val_loss: 0.2369 - val_acc: 0.9088
Learning rate:  0.001
Epoch 122/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3119 - acc: 0.8738 - val_loss: 0.2951 - val_acc: 0.8837
Learning rate:  0.001
Epoch 123/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3128 - acc: 0.8742 - val_loss: 0.2429 - val_acc: 0.9069
Learning rate:  0.001
Epoch 124/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3026 - acc: 0.8742 - val_loss: 0.2333 - val_acc: 0.9088
Learning rate:  0.001
Epoch 125/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3116 - acc: 0.8721 - val_loss: 0.2511 - val_acc: 0.9012
Learning rate:  0.001
Epoch 126/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3032 - acc: 0.8770 - val_loss: 0.2403 - val_acc: 0.9056
Learning rate:  0.001
Epoch 127/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3095 - acc: 0.8744 - val_loss: 0.2457 - val_acc: 0.9007
Learning rate:  0.001
Epoch 128/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3138 - acc: 0.8697 - val_loss: 0.2844 - val_acc: 0.8856
Learning rate:  0.001
Epoch 129/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3110 - acc: 0.8730 - val_loss: 0.2533 - val_acc: 0.9031
Learning rate:  0.001
Epoch 130/300
924/924 [==============================] - 117s 126ms/step - loss: 0.3082 - acc: 0.8753 - val_loss: 0.2379 - val_acc: 0.9107
Learning rate:  0.001
Epoch 131/300
924/924 [==============================] - 119s 129ms/step - loss: 0.3092 - acc: 0.8765 - val_loss: 0.2348 - val_acc: 0.9080
Learning rate:  0.001
Epoch 132/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3042 - acc: 0.8770 - val_loss: 0.2358 - val_acc: 0.9077
Learning rate:  0.001
Epoch 133/300
924/924 [==============================] - 117s 127ms/step - loss: 0.3047 - acc: 0.8764 - val_loss: 0.2446 - val_acc: 0.8999
Learning rate:  0.001
Epoch 134/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3077 - acc: 0.8745 - val_loss: 0.2536 - val_acc: 0.9018
Learning rate:  0.001
Epoch 135/300
924/924 [==============================] - 116s 125ms/step - loss: 0.3000 - acc: 0.8784 - val_loss: 0.2561 - val_acc: 0.8953
Learning rate:  0.001
Epoch 136/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3012 - acc: 0.8772 - val_loss: 0.2446 - val_acc: 0.9061
Learning rate:  0.001
Epoch 137/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3029 - acc: 0.8765 - val_loss: 0.2627 - val_acc: 0.8923
Learning rate:  0.001
Epoch 138/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3025 - acc: 0.8777 - val_loss: 0.2485 - val_acc: 0.9002
Learning rate:  0.001
Epoch 139/300
924/924 [==============================] - 116s 126ms/step - loss: 0.3098 - acc: 0.8758 - val_loss: 0.2510 - val_acc: 0.8999
Learning rate:  0.001
Epoch 140/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3015 - acc: 0.8812 - val_loss: 0.2605 - val_acc: 0.9042
Learning rate:  0.001
Epoch 141/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3021 - acc: 0.8776 - val_loss: 0.3481 - val_acc: 0.8571
Learning rate:  0.001
Epoch 142/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2994 - acc: 0.8797 - val_loss: 0.2790 - val_acc: 0.8899
Learning rate:  0.001
Epoch 143/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2981 - acc: 0.8773 - val_loss: 0.2501 - val_acc: 0.9002
Learning rate:  0.001
Epoch 144/300
924/924 [==============================] - 118s 128ms/step - loss: 0.3027 - acc: 0.8761 - val_loss: 0.2488 - val_acc: 0.9118
Learning rate:  0.001
Epoch 145/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2984 - acc: 0.8794 - val_loss: 0.2407 - val_acc: 0.9042
Learning rate:  0.001
Epoch 146/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2981 - acc: 0.8793 - val_loss: 0.2465 - val_acc: 0.9034
Learning rate:  0.001
Epoch 147/300
924/924 [==============================] - 118s 127ms/step - loss: 0.2946 - acc: 0.8814 - val_loss: 0.2791 - val_acc: 0.8920
Learning rate:  0.001
Epoch 148/300
924/924 [==============================] - 116s 125ms/step - loss: 0.2972 - acc: 0.8814 - val_loss: 0.2627 - val_acc: 0.8966
Learning rate:  0.001
Epoch 149/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2946 - acc: 0.8803 - val_loss: 0.2347 - val_acc: 0.9096
Learning rate:  0.001
Epoch 150/300
924/924 [==============================] - 118s 128ms/step - loss: 0.2942 - acc: 0.8794 - val_loss: 0.2400 - val_acc: 0.9123
Learning rate:  0.001
Epoch 151/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2966 - acc: 0.8825 - val_loss: 0.2414 - val_acc: 0.9026
Learning rate:  0.0001
Epoch 152/300
924/924 [==============================] - 117s 127ms/step - loss: 0.2882 - acc: 0.8839 - val_loss: 0.2269 - val_acc: 0.9134
Learning rate:  0.0001
Epoch 153/300
924/924 [==============================] - 117s 127ms/step - loss: 0.2840 - acc: 0.8866 - val_loss: 0.2285 - val_acc: 0.9083
Learning rate:  0.0001
Epoch 154/300
924/924 [==============================] - 116s 125ms/step - loss: 0.2832 - acc: 0.8853 - val_loss: 0.2355 - val_acc: 0.9091
Learning rate:  0.0001
Epoch 155/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2727 - acc: 0.8895 - val_loss: 0.2290 - val_acc: 0.9126
Learning rate:  0.0001
Epoch 156/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2796 - acc: 0.8872 - val_loss: 0.2284 - val_acc: 0.9126
Learning rate:  0.0001
Epoch 157/300
924/924 [==============================] - 117s 126ms/step - loss: 0.2801 - acc: 0.8886 - val_loss: 0.2222 - val_acc: 0.9134
Learning rate:  0.0001
Epoch 158/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2772 - acc: 0.8881 - val_loss: 0.2256 - val_acc: 0.9129
Learning rate:  0.0001
Epoch 159/300
924/924 [==============================] - 118s 128ms/step - loss: 0.2782 - acc: 0.8876 - val_loss: 0.2251 - val_acc: 0.9137
Learning rate:  0.0001
Epoch 160/300
924/924 [==============================] - 117s 126ms/step - loss: 0.2760 - acc: 0.8910 - val_loss: 0.2271 - val_acc: 0.9140
Learning rate:  0.0001
Epoch 161/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2705 - acc: 0.8899 - val_loss: 0.2330 - val_acc: 0.9113
Learning rate:  0.0001
Epoch 162/300
924/924 [==============================] - 117s 126ms/step - loss: 0.2757 - acc: 0.8909 - val_loss: 0.2247 - val_acc: 0.9137
Learning rate:  0.0001
Epoch 163/300
924/924 [==============================] - 118s 128ms/step - loss: 0.2783 - acc: 0.8872 - val_loss: 0.2329 - val_acc: 0.9104
Learning rate:  0.0001
Epoch 164/300
924/924 [==============================] - 118s 128ms/step - loss: 0.2726 - acc: 0.8900 - val_loss: 0.2243 - val_acc: 0.9129
Learning rate:  0.0001
Epoch 165/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2731 - acc: 0.8932 - val_loss: 0.2324 - val_acc: 0.9115
Learning rate:  0.0001
Epoch 166/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2760 - acc: 0.8912 - val_loss: 0.2283 - val_acc: 0.9126
Learning rate:  0.0001
Epoch 167/300
924/924 [==============================] - 117s 126ms/step - loss: 0.2734 - acc: 0.8912 - val_loss: 0.2240 - val_acc: 0.9156
Learning rate:  0.0001
Epoch 168/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2774 - acc: 0.8883 - val_loss: 0.2318 - val_acc: 0.9115
Learning rate:  0.0001
Epoch 169/300
924/924 [==============================] - 118s 128ms/step - loss: 0.2795 - acc: 0.8883 - val_loss: 0.2248 - val_acc: 0.9126
Learning rate:  0.0001
Epoch 170/300
924/924 [==============================] - 119s 128ms/step - loss: 0.2734 - acc: 0.8929 - val_loss: 0.2202 - val_acc: 0.9159
Learning rate:  0.0001
Epoch 171/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2759 - acc: 0.8902 - val_loss: 0.2319 - val_acc: 0.9094
Learning rate:  0.0001
Epoch 172/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2689 - acc: 0.8967 - val_loss: 0.2219 - val_acc: 0.9150
Learning rate:  0.0001
Epoch 173/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2707 - acc: 0.8931 - val_loss: 0.2266 - val_acc: 0.9121
Learning rate:  0.0001
Epoch 174/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2736 - acc: 0.8892 - val_loss: 0.2232 - val_acc: 0.9140
Learning rate:  0.0001
Epoch 175/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2743 - acc: 0.8905 - val_loss: 0.2260 - val_acc: 0.9126
Learning rate:  0.0001
Epoch 176/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2755 - acc: 0.8962 - val_loss: 0.2245 - val_acc: 0.9134
Learning rate:  0.0001
Epoch 177/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2754 - acc: 0.8910 - val_loss: 0.2359 - val_acc: 0.9107
Learning rate:  0.0001
Epoch 178/300
924/924 [==============================] - 117s 127ms/step - loss: 0.2709 - acc: 0.8943 - val_loss: 0.2237 - val_acc: 0.9137
Learning rate:  0.0001
Epoch 179/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2721 - acc: 0.8888 - val_loss: 0.2265 - val_acc: 0.9131
Learning rate:  0.0001
Epoch 180/300
924/924 [==============================] - 116s 126ms/step - loss: 0.2691 - acc: 0.8914 - val_loss: 0.2230 - val_acc: 0.9142
Learning rate:  0.0001
Epoch 181/300
924/924 [==============================] - 118s 127ms/step - loss: 0.2661 - acc: 0.8947 - val_loss: 0.2262 - val_acc: 0.9102
Learning rate:  0.0001
Epoch 182/300
849/924 [==========================>...] - ETA: 8s - loss: 0.2698 - acc: 0.8957
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-21-e113ae53ac1e> in <module>
     23                     verbose=1,
     24                     steps_per_epoch=len(trDat)/16,
---> 25                     callbacks=callbacks_list)

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1777         use_multiprocessing=use_multiprocessing,
   1778         shuffle=shuffle,
-> 1779         initial_epoch=initial_epoch)
   1780 
   1781   def evaluate_generator(self,

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\engine\training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    202 
    203         outs = model.train_on_batch(
--> 204             x, y, sample_weight=sample_weight, class_weight=class_weight)
    205 
    206         if not isinstance(outs, list):

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\engine\training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1550 
   1551       self._make_train_function()
-> 1552       outputs = self.train_function(ins)
   1553 
   1554     if len(outputs) == 1:

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\backend.py in __call__(self, inputs)
   2912       self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
   2913 
-> 2914     fetched = self._callable_fn(*array_vals)
   2915     self._call_fetch_callbacks(fetched[-len(self._fetches):])
   2916     return fetched[:len(self.outputs)]

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\client\session.py in __call__(self, *args, **kwargs)
   1380           ret = tf_session.TF_SessionRunCallable(
   1381               self._session._session, self._handle, args, status,
-> 1382               run_metadata_ptr)
   1383         if run_metadata:
   1384           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 
In [22]:
##### Now the training is complete, we get
# another object to load the weights
# compile it, so that we can do 
# final evaluation on it
modelGo.load_weights(filepath)
modelGo.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
In [23]:
# Make classification on the test dataset
predicts    = modelGo.predict(tsDat)

# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 91.59%
              precision    recall  f1-score   support

  non-flower     0.8640    0.9315    0.8965      1446
      flower     0.9537    0.9058    0.9291      2250

    accuracy                         0.9159      3696
   macro avg     0.9088    0.9187    0.9128      3696
weighted avg     0.9186    0.9159    0.9164      3696

[[1347   99]
 [ 212 2038]]
In [24]:
import pandas as pd

records     = pd.read_csv(modelname +'.csv')
plt.figure()
plt.subplot(211)
plt.plot(records['val_loss'])
plt.plot(records['loss'])
plt.yticks([0, 0.20, 0.30, 0.4, 0.5])
plt.title('Loss value',fontsize=12)

ax          = plt.gca()
ax.set_xticklabels([])



plt.subplot(212)
plt.plot(records['val_acc'])
plt.plot(records['acc'])
plt.yticks([0.7, 0.8, 0.9, 1.0])
plt.title('Accuracy',fontsize=12)
plt.show()
In [25]:
wrong_ans_index = []

for i in range(len(predout)):
    if predout[i] != testout[i]:
        wrong_ans_index.append(i)
In [26]:
wrong_ans_index = list(set(wrong_ans_index))
In [ ]:
# Randomly show X examples of that was wrong

dataset = tsDatOrg #flowers #fungus #rocks

for index in wrong_ans_index:
    #index = wrong_ans_index[random.randint(0, len(wrong_ans_index)-1)]
    print("Showing %s index image" %(index))
    print("Predicted as %s but is actually %s" %(predout[index], testout[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 3155 index image
Predicted as 1 but is actually 0
Showing 2051 index image
Predicted as 0 but is actually 1
Showing 4 index image
Predicted as 0 but is actually 1
Showing 3588 index image
Predicted as 1 but is actually 0
Showing 3080 index image
Predicted as 1 but is actually 0
Showing 17 index image
Predicted as 0 but is actually 1
Showing 2583 index image
Predicted as 1 but is actually 0
Showing 536 index image
Predicted as 0 but is actually 1
Showing 2584 index image
Predicted as 1 but is actually 0
Showing 2585 index image
Predicted as 1 but is actually 0
Showing 3610 index image
Predicted as 1 but is actually 0
Showing 3668 index image
Predicted as 1 but is actually 0
Showing 2589 index image
Predicted as 1 but is actually 0
Showing 32 index image
Predicted as 0 but is actually 1
Showing 33 index image
Predicted as 0 but is actually 1
Showing 34 index image
Predicted as 0 but is actually 1
Showing 1572 index image
Predicted as 0 but is actually 1
Showing 37 index image
Predicted as 0 but is actually 1
Showing 1065 index image
Predicted as 0 but is actually 1
Showing 3625 index image
Predicted as 1 but is actually 0
Showing 2044 index image
Predicted as 0 but is actually 1
Showing 2094 index image
Predicted as 0 but is actually 1
Showing 2606 index image
Predicted as 1 but is actually 0
Showing 2610 index image
Predicted as 1 but is actually 0
Showing 3063 index image
Predicted as 1 but is actually 0
Showing 52 index image
Predicted as 0 but is actually 1
Showing 564 index image
Predicted as 0 but is actually 1
Showing 1079 index image
Predicted as 0 but is actually 1
Showing 571 index image
Predicted as 0 but is actually 1
Showing 61 index image
Predicted as 0 but is actually 1
Showing 2111 index image
Predicted as 0 but is actually 1
Showing 576 index image
Predicted as 0 but is actually 1
Showing 3136 index image
Predicted as 1 but is actually 0
Showing 3141 index image
Predicted as 1 but is actually 0
Showing 70 index image
Predicted as 0 but is actually 1
Showing 1606 index image
Predicted as 0 but is actually 1
Showing 2632 index image
Predicted as 1 but is actually 0
Showing 3656 index image
Predicted as 1 but is actually 0
Showing 75 index image
Predicted as 0 but is actually 1
Showing 587 index image
Predicted as 0 but is actually 1
Showing 3068 index image
Predicted as 1 but is actually 0
Showing 2127 index image
Predicted as 0 but is actually 1
Showing 1616 index image
Predicted as 0 but is actually 1
Showing 1618 index image
Predicted as 0 but is actually 1
Showing 83 index image
Predicted as 0 but is actually 1
Showing 1619 index image
Predicted as 0 but is actually 1
Showing 597 index image
Predicted as 0 but is actually 1
Showing 1109 index image
Predicted as 0 but is actually 1
Showing 2132 index image
Predicted as 0 but is actually 1
Showing 88 index image
Predicted as 0 but is actually 1
Showing 89 index image
Predicted as 0 but is actually 1
Showing 601 index image
Predicted as 0 but is actually 1
Showing 1114 index image
Predicted as 0 but is actually 1
Showing 2134 index image
Predicted as 0 but is actually 1
Showing 2135 index image
Predicted as 0 but is actually 1
Showing 2651 index image
Predicted as 1 but is actually 0
Showing 3680 index image
Predicted as 1 but is actually 0
Showing 2658 index image
Predicted as 1 but is actually 0
Showing 2663 index image
Predicted as 1 but is actually 0
Showing 3177 index image
Predicted as 1 but is actually 0
Showing 1642 index image
Predicted as 0 but is actually 1
Showing 621 index image
Predicted as 0 but is actually 1
Showing 1645 index image
Predicted as 0 but is actually 1
Showing 3183 index image
Predicted as 1 but is actually 0
Showing 631 index image
Predicted as 0 but is actually 1
Showing 633 index image
Predicted as 0 but is actually 1
Showing 2683 index image
Predicted as 1 but is actually 0
Showing 636 index image
Predicted as 0 but is actually 1
Showing 2172 index image
Predicted as 0 but is actually 1
Showing 2176 index image
Predicted as 0 but is actually 1
Showing 1153 index image
Predicted as 0 but is actually 1
Showing 130 index image
Predicted as 0 but is actually 1
In [ ]:
# Stacking 3 NNs?